}
#else /* !defined(CONFIG_X86_64) */
if ( supervisor_mode_kernel && cpu_has_sep )
- wrmsr(MSR_IA32_SYSENTER_ESP, &init_tss[smp_processor_id()].esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
#endif
/* Maybe load the debug registers. */
void __cpuinit cpu_init(void)
{
int cpu = smp_processor_id();
- struct tss_struct *t = &init_tss[cpu];
+ struct tss_struct *t = &this_cpu(init_tss);
struct desc_ptr gdt_desc = {
.base = (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
.limit = LAST_RESERVED_GDT_BYTE
static inline void switch_kernel_stack(struct vcpu *v)
{
- struct tss_struct *tss = &init_tss[smp_processor_id()];
+ struct tss_struct *tss = &this_cpu(init_tss);
tss->esp1 = v->arch.guest_context.kernel_sp;
tss->ss1 = v->arch.guest_context.kernel_ss;
}
__vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
__vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
- __vmwrite(HOST_TR_BASE, (unsigned long)&init_tss[cpu]);
+ __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
__vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
= boot_cpu_compat_gdt_table;
#endif
-struct tss_struct init_tss[NR_CPUS];
+DEFINE_PER_CPU(struct tss_struct, init_tss);
char __attribute__ ((__section__(".bss.stack_aligned"))) cpu0_stack[STACK_SIZE];
printk("Valid stack range: %p-%p, sp=%p, tss.esp0=%p\n",
(void *)esp_top, (void *)esp_bottom, (void *)esp,
- (void *)init_tss[cpu].esp0);
+ (void *)per_cpu(init_tss, cpu).esp0);
/* Trigger overflow trace if %esp is within 512 bytes of the guard page. */
if ( ((unsigned long)(esp - esp_top) > 512) &&
void load_TR(void)
{
- struct tss_struct *tss = &init_tss[smp_processor_id()];
+ struct tss_struct *tss = &this_cpu(init_tss);
struct desc_ptr old_gdt, tss_gdt = {
.base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
.limit = LAST_RESERVED_GDT_BYTE
long do_stack_switch(unsigned long ss, unsigned long esp)
{
- int nr = smp_processor_id();
- struct tss_struct *t = &init_tss[nr];
+ struct tss_struct *t = &this_cpu(init_tss);
fixup_guest_stack_selector(current->domain, ss);
movl $PER_CPU_GDT_ENTRY*8,%ecx
lsll %ecx,%ecx
- shll $7,%ecx # Each TSS entry is 0x80 bytes
- addl $init_tss,%ecx
+ shll $PERCPU_SHIFT,%ecx
+ addl $per_cpu__init_tss,%ecx
# Load Xen stack from TSS.
movw TSS_ss0(%ecx),%ax
asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_GDT_ENTRY << 3) );
/* Find information saved during fault and dump it to the console. */
- tss = &init_tss[cpu];
+ tss = &per_cpu(init_tss, cpu);
printk("*** DOUBLE FAULT ***\n");
print_xen_info();
printk("CPU: %d\nEIP: %04x:[<%08x>]",
BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
/* Machine Check handler has its own per-CPU 4kB stack. */
- init_tss[cpu].ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
+ this_cpu(init_tss).ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
/* Double-fault handler has its own per-CPU 4kB stack. */
- init_tss[cpu].ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
+ this_cpu(init_tss).ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
/* NMI handler has its own per-CPU 4kB stack. */
- init_tss[cpu].ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
+ this_cpu(init_tss).ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
/* Trampoline for SYSCALL entry from long mode. */
stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */
extern idt_entry_t idt_table[];
extern idt_entry_t *idt_tables[];
-extern struct tss_struct init_tss[NR_CPUS];
+DECLARE_PER_CPU(struct tss_struct, init_tss);
extern void init_int80_direct_trap(struct vcpu *v);